svm: Replace call to svm_load_cr2 with conditional call to svm_trace_vmentry
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Thu, 24 May 2007 12:56:58 +0000 (13:56 +0100)
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Thu, 24 May 2007 12:56:58 +0000 (13:56 +0100)
Remove the call to svm_load_cr2 (which doesn't do anything useful).
The old svm_load_cr2 is now replaced with a svm_trace_vmentry. A call
to this function is done if "tb_init_done" is non-zero, so we don't call
this unless it's actually being used.

Signed-off-by: Mats Petersson <mats.petersson@amd.com>
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/svm/x86_32/exits.S
xen/arch/x86/hvm/svm/x86_64/exits.S

index f7c6efb3ec9432e7c6bed1d01298df4384835714..669fa4b7db13510e626460a3fdd21d83d2a6245b 100644 (file)
@@ -2520,14 +2520,12 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
     }
 }
 
-asmlinkage void svm_load_cr2(void)
+asmlinkage void svm_trace_vmentry(void)
 {
     struct vcpu *v = current;
 
     /* This is the last C code before the VMRUN instruction. */
     HVMTRACE_0D(VMENTRY, v);
-
-    asm volatile ( "mov %0,%%cr2" : : "r" (v->arch.hvm_svm.cpu_cr2) );
 }
   
 /*
index cb2bebb36ca7427ccffc507ca005da1fef5a82f1..ba5c064b68c634b866ac491fbe4171e088c09c6d 100644 (file)
 ENTRY(svm_asm_do_resume)
         GET_CURRENT(%ebx)
         CLGI
-        /* Run ASID stuff. */
-        call svm_asid_handle_vmrun
 
         movl VCPU_processor(%ebx),%eax
         shl  $IRQSTAT_shift,%eax
         testl $~0,irq_stat(%eax,1)
         jnz  svm_process_softirqs
+
+        call svm_asid_handle_vmrun
         call svm_intr_assist
-        call svm_load_cr2
+
+        /* Check if the trace buffer is initialized. 
+         * Because the below condition is unlikely, we jump out of line
+         * instead of having a mostly taken branch over the unlikely code.
+         */
+        cmpb $0,tb_init_done
+        jnz  svm_trace
+svm_trace_done:
 
         movl VCPU_svm_vmcb(%ebx),%ecx
         movl UREGS_eax(%esp),%eax
@@ -89,3 +96,11 @@ svm_process_softirqs:
         STGI
         call do_softirq
         jmp  svm_asm_do_resume
+
+svm_trace:
+        /* Call out to C, as this is not speed critical path
+         * Note: svm_trace_vmentry will recheck the tb_init_done,
+         * but this is on the slow path, so who cares 
+         */
+        call svm_trace_vmentry
+        jmp  svm_trace_done
index 98f13a1be898ab9058a8be6fef9d51da44368098..e991bb4cd4f1bc83d2a2d77a8d0bae31fd713b62 100644 (file)
 ENTRY(svm_asm_do_resume)
         GET_CURRENT(%rbx)
         CLGI
-        /* Run ASID stuff. */
-        call svm_asid_handle_vmrun
 
         movl VCPU_processor(%rbx),%eax
         shl  $IRQSTAT_shift,%rax
         leaq irq_stat(%rip),%rdx
         testl $~0,(%rdx,%rax,1)
         jnz  svm_process_softirqs
+
+        call svm_asid_handle_vmrun
         call svm_intr_assist
-        call svm_load_cr2
+
+        /* Check if the trace buffer is initialized. 
+         * Because the below condition is unlikely, we jump out of line
+         * instead of having a mostly taken branch over the unlikely code.
+         */
+        cmpb $0,tb_init_done(%rip)
+        jnz  svm_trace
+svm_trace_done:
 
         movq VCPU_svm_vmcb(%rbx),%rcx
         movq UREGS_rax(%rsp),%rax
@@ -106,3 +113,11 @@ svm_process_softirqs:
         STGI
         call do_softirq
         jmp  svm_asm_do_resume
+
+svm_trace:
+        /* Call out to C, as this is not speed critical path
+         * Note: svm_trace_vmentry will recheck the tb_init_done,
+         * but this is on the slow path, so who cares 
+         */
+        call svm_trace_vmentry
+        jmp  svm_trace_done